bitkeeper revision 1.1236.1.41 (4224ab34YunoDc0_FV3T0OZPcJ0Pcw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 1 Mar 2005 17:49:40 +0000 (17:49 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 1 Mar 2005 17:49:40 +0000 (17:49 +0000)
Performance counters for hypercalls and exceptions. Perfctr histograms
for pagetable updates.
Signed-off-by: Rolf Neugebauer <rolf.neugebauer@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@cl.cam.ac.uk>
xen/arch/x86/mm.c
xen/arch/x86/x86_32/asm-offsets.c
xen/arch/x86/x86_32/entry.S
xen/arch/x86/x86_64/asm-offsets.c
xen/arch/x86/x86_64/entry.S
xen/common/perfc.c
xen/include/asm-x86/x86_32/asm_defns.h
xen/include/asm-x86/x86_64/asm_defns.h
xen/include/xen/perfc.h
xen/include/xen/perfc_defn.h

index 4a6254bac5da4a2834e062b937361ab8a21bed74..72ba2e279738df52113493661dc1e14fd92a0ed9 100644 (file)
@@ -1687,6 +1687,7 @@ int do_mmu_update(
 
     perfc_incrc(calls_to_mmu_update); 
     perfc_addc(num_page_updates, count);
+    perfc_incr_histo(bpt_updates, count, PT_UPDATES);
 
     if ( unlikely(!array_access_ok(VERIFY_READ, ureqs, count, sizeof(req))) )
     {
@@ -2234,6 +2235,9 @@ void ptwr_flush(const int which)
     int            i, cpu = smp_processor_id();
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
+#ifdef PERF_COUNTERS
+    unsigned int   modified = 0;
+#endif
 
     l1va = ptwr_info[cpu].ptinfo[which].l1va;
     ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
@@ -2302,6 +2306,11 @@ void ptwr_flush(const int which)
         if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
             continue;
 
+#ifdef PERF_COUNTERS
+        /* Update number of entries modified. */
+        modified++;
+#endif
+
         /*
          * Fast path for PTEs that have merely been write-protected
          * (e.g., during a Unix fork()). A strict reduction in privilege.
@@ -2343,6 +2352,8 @@ void ptwr_flush(const int which)
     }
     unmap_domain_mem(pl1e);
 
+    perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
+
     /*
      * STEP 3. Reattach the L1 p.t. page into the current address space.
      */
index 839893b793751d29a121042ccaaeb9f584673267..1733fb1240e05bff12599656d60233953f32a47a 100644 (file)
@@ -4,6 +4,7 @@
  * to extract and format the required data.
  */
 
+#include <xen/config.h>
 #include <xen/sched.h>
 
 #define DEFINE(_sym, _val) \
@@ -58,6 +59,12 @@ void __dummy__(void)
     OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
     BLANK();
 
+#if PERF_COUNTERS
+    OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
+    OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
+    BLANK();
+#endif
+
     OFFSET(MULTICALL_op, multicall_entry_t, op);
     OFFSET(MULTICALL_arg0, multicall_entry_t, args[0]);
     OFFSET(MULTICALL_arg1, multicall_entry_t, args[1]);
index 7d3411ef42d2296674d5d3ee8d96dfa0fe87043a..c31b3582d535075f65044d381b55c01a00b3eaa4 100644 (file)
@@ -280,8 +280,9 @@ ENTRY(hypercall)
        SAVE_ALL(b)
         sti
         GET_CURRENT(%ebx)
-       andl $(NR_hypercalls-1),%eax
-       call *SYMBOL_NAME(hypercall_table)(,%eax,4)
+        andl $(NR_hypercalls-1),%eax
+        PERFC_INCR(PERFC_hypercalls, %eax)
+        call *SYMBOL_NAME(hypercall_table)(,%eax,4)
         movl %eax,XREGS_eax(%esp)       # save the return value
 
 test_all_events:
@@ -468,6 +469,7 @@ error_code:
         movl  %esp,%edx
        pushl %edx                      # push the xen_regs pointer
        GET_CURRENT(%ebx)
+        PERFC_INCR(PERFC_exceptions, %eax)
        call  *SYMBOL_NAME(exception_table)(,%eax,4)
         addl  $4,%esp
         movl  XREGS_eflags(%esp),%eax
index 309afbc6ee74cf68fee53f5bce9235ec09598abc..40eee8be089a5f0ad598ba0f0f206069b38d061c 100644 (file)
@@ -4,6 +4,7 @@
  * to extract and format the required data.
  */
 
+#include <xen/config.h>
 #include <xen/sched.h>
 
 #define DEFINE(_sym, _val) \
@@ -62,6 +63,12 @@ void __dummy__(void)
     OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
     BLANK();
 
+#if PERF_COUNTERS
+    OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
+    OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
+    BLANK();
+#endif
+
     OFFSET(MULTICALL_op, multicall_entry_t, op);
     OFFSET(MULTICALL_arg0, multicall_entry_t, args[0]);
     OFFSET(MULTICALL_arg1, multicall_entry_t, args[1]);
index c957c1b19bfae674593c7bd9489fc2bcab206620..181e2e3ce40a43a9dd02bcbde6deac34af1cc83e 100644 (file)
@@ -141,6 +141,7 @@ hypercall:
         movq  %r10,%rcx
         andq  $(NR_hypercalls-1),%rax
         leaq  SYMBOL_NAME(hypercall_table)(%rip),%r10
+        PERFC_INCR(PERFC_hypercalls, %rax)
         callq *(%r10,%rax,8)
         movq %rax,XREGS_rax(%rsp)       # save the return value
 
@@ -297,6 +298,7 @@ error_code:
         movl  XREGS_entry_vector(%rsp),%eax
         leaq  SYMBOL_NAME(exception_table)(%rip),%rdx
         GET_CURRENT(%rbx)
+        PERFC_INCR(PERFC_exceptions, %rax)
         callq *(%rdx,%rax,8)
         testb $3,XREGS_cs(%rsp)
         jz    restore_all_xen
index c55f42922ff593e117e85246aebe3c4cf82f1522..2088b45ee774e54e17cdc8b54bef77d5c005abc5 100644 (file)
@@ -67,7 +67,11 @@ void perfc_printall(unsigned char key)
                 sum += atomic_read(&counters[j]);
             printk("TOTAL[%10d]  ", sum);
             for ( j = 0; j < perfc_info[i].nr_elements; j++ )
+            {
+                if ( (j != 0) && ((j % 4) == 0) )
+                    printk("\n                   ");
                 printk("ARR%02d[%10d]  ", j, atomic_read(&counters[j]));
+            }
             counters += j;
             break;
         }
index 08932f2610fa5f6e060c3b6b50ff106049f49ac8..8cdd9c54983f1effa47fd1be1ddc814ead08b050 100644 (file)
         SET_XEN_SEGMENTS(_reg) \
         1:
 
+#ifdef PERF_COUNTERS
+#define PERFC_INCR(_name,_idx) \
+    lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4)
+#else
+#define PERFC_INCR(_name,_idx)
+#endif
+
 #endif
 
 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
index f9478f9190d2d58881c5aed18236351733381920..3ac95024cc3bbe0b8bf8cf3f7dd813099c648aa2 100644 (file)
         popq  %rsi; \
         popq  %rdi;
 
+#ifdef PERF_COUNTERS
+#define PERFC_INCR(_name,_idx) \
+    pushq %rdx; \
+    leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \
+    lock incl (%rdx,_idx,4); \
+    popq %rdx;
+#else
+#define PERFC_INCR(_name,_idx)
+#endif
+
 #endif
 
 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
index f954a95f39be7b0da109a3abeb7b1e90254d060e..e691bb7f5df91275bc4dcfa60ccb19517747cbe3 100644 (file)
@@ -54,26 +54,51 @@ extern struct perfcounter perfcounters;
 
 #define perfc_value(x)    atomic_read(&perfcounters.x[0])
 #define perfc_valuec(x)   atomic_read(&perfcounters.x[smp_processor_id()])
-#define perfc_valuea(x,y) \
-  { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
-    atomic_read(&perfcounters.x[y]); }
+#define perfc_valuea(x,y)                                               \
+    do {                                                                \
+        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+            atomic_read(&perfcounters.x[y]);                            \
+    } while ( 0 )
 #define perfc_set(x,v)    atomic_set(&perfcounters.x[0], v)
 #define perfc_setc(x,v)   atomic_set(&perfcounters.x[smp_processor_id()], v)
-#define perfc_seta(x,y,v) \
-  { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
-    atomic_set(&perfcounters.x[y], v); }
+#define perfc_seta(x,y,v)                                               \
+    do {                                                                \
+        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+            atomic_set(&perfcounters.x[y], v);                          \
+    } while ( 0 )
 #define perfc_incr(x)     atomic_inc(&perfcounters.x[0])
 #define perfc_decr(x)     atomic_dec(&perfcounters.x[0])
 #define perfc_incrc(x)    atomic_inc(&perfcounters.x[smp_processor_id()])
-#define perfc_incra(x,y)  \
-  { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
-    atomic_inc(&perfcounters.x[y]); }
+#define perfc_incra(x,y)                                                \
+    do {                                                                \
+        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+            atomic_inc(&perfcounters.x[y]);                             \
+    } while ( 0 )
 #define perfc_add(x,y)    atomic_add((y), &perfcounters.x[0])
 #define perfc_addc(x,y)   atomic_add((y), &perfcounters.x[smp_processor_id()])
-#define perfc_adda(x,y,z) \
-  { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
-    atomic_add((z), &perfcounters.x[y]); }
+#define perfc_adda(x,y,z)                                               \
+    do {                                                                \
+        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+            atomic_add((z), &perfcounters.x[y]);                        \
+    } while ( 0 )
 
+/*
+ * Histogram: special treatment for 0 and 1 count. After that equally spaced 
+ * with last bucket taking the rest.
+ */
+#define perfc_incr_histo(_x,_v,_n)                                          \
+    do {                                                                    \
+        if ( (_v) == 0 )                                                    \
+            perfc_incra(_x, 0);                                             \
+        else if ( (_v) == 1 )                                               \
+            perfc_incra(_x, 1);                                             \
+        else if ( (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) <               \
+                  (PERFC_MAX_ ## _n - 3) )                                  \
+            perfc_incra(_x, (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) + 2); \
+        else                                                                \
+            perfc_incra(_x, PERFC_MAX_ ## _n - 1);                          \
+    } while ( 0 )
+    
 #else /* PERF_COUNTERS */
 
 #define perfc_value(x)    (0)
@@ -89,6 +114,7 @@ extern struct perfcounter perfcounters;
 #define perfc_add(x,y)    ((void)0)
 #define perfc_addc(x,y)   ((void)0)
 #define perfc_adda(x,y,z) ((void)0)
+#define perfc_incr_histo(x,y,z) ((void)0)
 
 #endif /* PERF_COUNTERS */
 
index a252af7ac7350725f923a4b656d67f56b9148e1b..c67c5f46b961364e738bc587259decf603399e4c 100644 (file)
@@ -36,7 +36,15 @@ PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
 PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
 PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
 
+#define PERFC_MAX_PT_UPDATES 64
+#define PERFC_PT_UPDATES_BUCKET_SIZE 3
+PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
+
+PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
+PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
+
 #define VMX_PERF_EXIT_REASON_SIZE 37
 #define VMX_PERF_VECTOR_SIZE 0x20
-PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
-PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
+PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
+PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )